/* Mem sharing: unshare the page and try again */
if ( npfec.write_access && (p2mt == p2m_ram_shared) )
{
- ASSERT(!p2m_is_nestedp2m(p2m));
+ ASSERT(p2m_is_hostp2m(p2m));
sharing_enomem =
(mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0);
rc = 1;
q);
if ( p2m_is_paging(*p2mt) )
{
- ASSERT(!p2m_is_nestedp2m(p2m));
+ ASSERT(p2m_is_hostp2m(p2m));
if ( page )
put_page(page);
p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
&p2mt, NULL, P2M_ALLOC | P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
- ASSERT(!p2m_is_nestedp2m(p2m));
+ ASSERT(p2m_is_hostp2m(p2m));
pfec[0] = PFEC_page_paged;
if ( top_page )
put_page(top_page);
put_page(page);
if ( p2m_is_paging(p2mt) )
{
- ASSERT(!p2m_is_nestedp2m(p2m));
+ ASSERT(p2m_is_hostp2m(p2m));
pfec[0] = PFEC_page_paged;
p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
return INVALID_GFN;
if ( needs_sync != sync_off )
ept_sync_domain(p2m);
- /* For non-nested p2m, may need to change VT-d page table.*/
- if ( rc == 0 && !p2m_is_nestedp2m(p2m) && need_iommu(d) &&
+ /* For host p2m, may need to change VT-d page table.*/
+ if ( rc == 0 && p2m_is_hostp2m(p2m) && need_iommu(d) &&
need_modify_vtd_table )
{
if ( iommu_hap_pt_share )
p2m->domain = d;
p2m->default_access = p2m_access_rwx;
+ p2m->p2m_class = p2m_host;
p2m->np2m_base = P2M_BASE_EADDR;
p2m_teardown_nestedp2m(d);
return -ENOMEM;
}
+ p2m->p2m_class = p2m_nested;
p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
list_add(&p2m->np2m_list, &p2m_get_hostp2m(d)->np2m_list);
}
int p2m_is_logdirty_range(struct p2m_domain *p2m, unsigned long start,
unsigned long end)
{
- ASSERT(!p2m_is_nestedp2m(p2m));
+ ASSERT(p2m_is_hostp2m(p2m));
if ( p2m->global_logdirty ||
rangeset_contains_range(p2m->logdirty_ranges, start, end) )
return 1;
if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
{
- ASSERT(!p2m_is_nestedp2m(p2m));
+ ASSERT(p2m_is_hostp2m(p2m));
/* Try to unshare. If we fail, communicate ENOMEM without
* sleeping. */
if ( mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0 )
p2m_lock(p2m);
- if ( !p2m_is_nestedp2m(p2m)
+ if ( p2m_is_hostp2m(p2m)
&& !page_list_empty(&d->page_list) )
{
P2M_ERROR("dom %d already has memory allocated\n", d->domain_id);
/* "Host" p2m tables can have shared entries &c that need a bit more
* care when discarding them */
- ASSERT(p2m_is_nestedp2m(p2m));
+ ASSERT(!p2m_is_hostp2m(p2m));
/* Nested p2m's do not do pod, hence the asserts (and no pod lock)*/
ASSERT(page_list_empty(&p2m->pod.super));
ASSERT(page_list_empty(&p2m->pod.single));
(P2M_RAM_TYPES | P2M_GRANT_TYPES | \
p2m_to_mask(p2m_map_foreign)))
+typedef enum {
+ p2m_host,
+ p2m_nested,
+} p2m_class_t;
+
/* Per-p2m-table state */
struct p2m_domain {
/* Lock that protects updates to the p2m */
struct domain *domain; /* back pointer to domain */
+ p2m_class_t p2m_class; /* host/nested/? */
+
/* Nested p2ms only: nested p2m base value that this p2m shadows.
* This can be cleared to P2M_BASE_EADDR under the per-p2m lock but
* needs both the per-p2m lock and the per-domain nestedp2m lock
*/
struct p2m_domain *p2m_get_p2m(struct vcpu *v);
-#define p2m_is_nestedp2m(p2m) ((p2m) != p2m_get_hostp2m((p2m->domain)))
+static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m)
+{
+ return p2m->p2m_class == p2m_host;
+}
+
+static inline bool_t p2m_is_nestedp2m(const struct p2m_domain *p2m)
+{
+ return p2m->p2m_class == p2m_nested;
+}
#define p2m_get_pagetable(p2m) ((p2m)->phys_table)